import cv2
import numpy as np
import glob
import time
import pickle
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from skimage.feature import hog
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import time
from sklearn.svm import LinearSVC
from scipy.ndimage.measurements import label
import os
from sklearn.preprocessing import StandardScaler
# NOTE: the next import is only valid
# for scikit-learn version <= 0.17
# if you are using scikit-learn >= 0.18 then use this:
# from sklearn.model_selection import train_test_split
from sklearn.cross_validation import train_test_split
spatial_size=(32, 32)
hist_bins=32
orient=9
pix_per_cell=8
cell_per_block=2
hog_channel=0
spatial_feat=True
hist_feat=True
hog_feat=True
# load the dataset
# path_dataset_vehicle = ./dataset/vehicles
# path_dataset_nonvehicle = ./dataset/non-vehicles
# vehicles
veh_gti_far = glob.glob('./dataset/vehicles/GTI_Far/*.png')
veh_gti_l = glob.glob('./dataset/vehicles/GTI_Left/*.png')
veh_gti_mid_close = glob.glob('./dataset/vehicles/GTI_MiddleClose/*.png')
veh_gti_r = glob.glob('./dataset/vehicles/GTI_Right/*.png')
veh_kitti = glob.glob('./dataset/vehicles/KITTI_extracted/*.png')
#non vehicles
nonveh_extras = glob.glob('./dataset/non-vehicles/Extras/*.png')
nonveh_gti = glob.glob('./dataset/non-vehicles/GTI/*.png')
# evaluate the dataset
len_veh_gti_far = len(veh_gti_far)
len_veh_gti_l = len(veh_gti_l)
len_veh_gti_mid_close = len(veh_gti_mid_close)
len_veh_gti_r = len(veh_gti_r)
len_veh_kitti = len(veh_kitti)
len_all_veh = len_veh_gti_far + len_veh_gti_l + len_veh_gti_mid_close \
+ len_veh_gti_r + len_veh_kitti
len_nonveh_extras = len(nonveh_extras)
len_nonveh_gti = len(nonveh_gti)
len_all_non_veh =len_nonveh_extras+len_nonveh_gti
print('veh_gti_far length: {}' .format(len_veh_gti_far))
print('veh_gti_l length: {}' .format(len_veh_gti_l))
print('veh_gti_far length: {}' .format(len_veh_gti_mid_close))
print('veh_gti_far length: {}' .format(len_veh_gti_r))
print('veh_gti_far length: {}' .format(len_veh_kitti))
print('nonveh_extras length: {}' .format(len_nonveh_extras))
print('nonveh_gti length: {}' .format(len_nonveh_gti))
print('Total Vehicles: {} Total Non Vehicles: {}' .format(len_all_veh,len_all_non_veh ))
# shuffle and split the data
all_veh = veh_gti_far + veh_gti_l + veh_gti_mid_close \
+ veh_gti_r + veh_kitti
all_non_veh = nonveh_extras + nonveh_gti
%matplotlib inline
# plt.rcParams['figure.figsize'] = (20,20)
# plt.rcParams['figure.figsize'] = (10.0, 8.0)
# matplotlib.rcParams['figure.figsize'] = (10.0, 8.0)
#print ('rows: {}' .format(rows))
n_examples = 5
columns = 2
w = 16
h = 16
# fig=plt.figure(figsize=(180, 160), dpi= 80, facecolor='w', edgecolor='k')
fig, axes = plt.subplots(n_examples,columns, figsize=(w,h))
axes = axes.ravel()
for i in range(len(axes)):
axes[i].axis('off')
# iterate through the classes and pull together useful information
for i in range(n_examples):
axes[i * 2].imshow(mpimg.imread(all_veh[np.random.randint(0,len(all_veh)-1)]))
axes[i * 2].set_title('vehicle')
axes[i * 2 +1].imshow(mpimg.imread(all_non_veh[np.random.randint(0,len(all_non_veh)-1)]))
axes[i * 2 +1].set_title('non vehicle')
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True):
if vis == True:
features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False,
visualise=True, feature_vector=False)
return features, hog_image
else:
features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block), transform_sqrt=False,
visualise=False, feature_vector=feature_vec)
return features
def plot_figure(array_to_plot, labels_array, n_rows, n_columns, figuresize=(64,64),colourmap='gray'):
fig, axes = plt.subplots( n_rows, n_columns,figsize=figuresize)
axes = axes.ravel()
for i in range(len(array_to_plot)):
print('i {}'.format(i))
print('title {}'.format(labels_array[i]))
axes[i].imshow(array_to_plot[i], cmap=colourmap)
axes[i].set_title(labels_array[i])
axes[i].axis('off')
# TODO Reinstate
plt.tight_layout()
plt.show()
plt.savefig('./output_images/dataset_samples.png')
def explore_hog(veh_img,gray):
%matplotlib inline
plt.rcParams['figure.figsize'] = (20,20)
print ('function to visualise the dataset and hog output')
i=0
rows = 2
columns = 4
w = 16
h = 16
# fig=plt.figure(figsize=(180, 160), dpi= 80, facecolor='w', edgecolor='k')
fig, axes = plt.subplots(rows,columns, figsize=(w,h))
plt.tight_layout()
fig.subplots_adjust(hspace = .1, wspace=.5)
axes = axes.ravel()
# for i in range(len(axes)):
# axes[i].axis('off')
# iterate through the classes and pull together useful information
# for i in range(n_examples):
# veh_img = mpimg.imread(all_veh[rnd])
axes[i*0].set_title('vehicle')
axes[i*0].imshow(veh_img)
axes[i*4+1].set_title('chn0')
axes[i*4+1].imshow(veh_img[:,:,0], cmap='gray')
axes[i*4+2].set_title('chn1')
axes[i*4+2].imshow(veh_img[:,:,1], cmap='gray')
axes[i*4+3].set_title('chn2')
axes[i*4+3].imshow(veh_img[:,:,2], cmap='gray')
i+=1
features, hog_image = get_hog_features(gray, orient,
pix_per_cell, cell_per_block,
vis=True, feature_vec=False)
axes[i*4].set_title('gray')
axes[i*4].imshow(hog_image, cmap='gray')
features, hog_image = get_hog_features(veh_img[:,:,0], orient,
pix_per_cell, cell_per_block,
vis=True, feature_vec=False)
axes[i*4+1].set_title('chn0')
axes[i*4+1].imshow(hog_image, cmap='gray')
features, hog_image = get_hog_features(veh_img[:,:,1], orient,
pix_per_cell, cell_per_block,
vis=True, feature_vec=False)
axes[i*4+2].set_title('chn1')
axes[i*4+2].imshow(hog_image, cmap='gray')
features, hog_image = get_hog_features(veh_img[:,:,2], orient,
pix_per_cell, cell_per_block,
vis=True, feature_vec=False)
axes[i*4+3].set_title('chn2')
axes[i*4+3].imshow(hog_image, cmap='gray')
def convert_color(img, cspace='YCrCb'):
if cspace != 'RGB':
if cspace == 'HSV':
return cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif cspace == 'LUV':
return cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif cspace == 'HLS':
return cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif cspace == 'YUV':
return cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
elif cspace == 'YCrCb':
return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
elif cspace == 'LAB':
return cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
else:
return np.copy(img)
# Define a function to compute binned color features
def bin_spatial(img, size=(32, 32)):
# Use cv2.resize().ravel() to create the feature vector
features = cv2.resize(img, size).ravel()
# Return the feature vector
return features
# Define a function to compute color histogram features
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
# Define a function to extract features from a list of images
# Have this function call bin_spatial() and color_hist()
def extract_features_from_course(files, cspace='RGB', spatial_size=(32, 32),
hist_bins=32, hist_range=(0, 256)):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in files:
# Read in each one by one
image = mpimg.imread(file)
# apply color conversion if other than 'RGB'
feature_image = convert_color(img, cspace)
# Apply bin_spatial() to get spatial color features
spatial_features = bin_spatial(feature_image, size=spatial_size)
# Apply color_hist() also with a color space option now
hist_features = color_hist(feature_image, nbins=hist_bins, bins_range=hist_range)
# Append the new feature vector to the features list
features.append(np.concatenate((spatial_features, hist_features)))
# Return list of feature vectors
return features
def extract_imgs_features(files, cspace='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True):
# Create a list to append feature vectors to
features = []
# Iterate through the list of images
for file in files:
img = mpimg.imread(file)
# plt.imshow(img)
flip = False
if flip:
newfeatures, rev_newfeatures = single_img_features_with_flip(img, cspace=cspace, spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=hog_channel,
spatial_feat=False, hist_feat=False, hog_feat=True)
features.append(newfeatures)
features.append(rev_newfeatures)
else:
features.append(single_img_features(img, cspace=cspace, spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=hog_channel,
spatial_feat=False, hist_feat=False, hog_feat=True))
return features
# Define a function to extract features from a single image window
# This function is very similar to extract_features()
# just for a single image rather than list of images
def single_img_features(img, cspace='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=False, hog_feat=True, hist_feat=False):
#1) Define an empty list to receive features
img_features = []
# print('image shape: {}' .format(img.shape))
#2) Apply color conversion if other than 'RGB'
feature_image = convert_color(img, cspace)
#3) Compute spatial features if flag is set
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
#4) Append features to list
img_features.append(spatial_features)
#5) Compute histogram features if flag is set
if hist_feat == True:
hist_features = color_hist(feature_image, nbins=hist_bins)
#6) Append features to list
img_features.append(hist_features)
#7) Compute HOG features if flag is set
if hog_feat == True:
# print('hog_feat')
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
# print('Using ALL channels'.format(hog_channel))
hog_features.extend(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
else:
# print('Using single channel: {}'.format(hog_channel))
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
#8) Append features to list
img_features.append(hog_features)
# print('img_features len: {}' .format(len(img_features)))
#9) Return concatenated array of features
return np.concatenate(img_features)
print('end of cell')
# Define a function to extract features from a single image window
# This function is very similar to extract_features()
# just for a single image rather than list of images
def single_img_features_with_flip(img, cspace='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=False, hog_feat=True, hist_feat=False):
#1) Define an empty list to receive features
img_features = []
rev_img_features = []
#2) Apply color conversion if other than 'RGB'
feature_image = convert_color(img, cspace)
reversed_image =np.copy(cv2.flip(feature_image, 1 ))
#3) Compute spatial features if flag is set
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
#4) Append features to list
img_features.append(spatial_features)
#5) Compute histogram features if flag is set
if hist_feat == True:
hist_features = color_hist(feature_image, nbins=hist_bins)
#6) Append features to list
img_features.append(hist_features)
#7) Compute HOG features if flag is set
if hog_feat == True:
if hog_channel == 'ALL':
hog_features = []
rev_hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.extend(get_hog_features(feature_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
rev_hog_features.extend(get_hog_features(reversed_image[:,:,channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
else:
hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
rev_hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
#8) Append features to list
img_features.append(hog_features)
rev_img_features.append(rev_hog_features)
# print('img_features len: {}' .format(len(img_features)))
#9) Return concatenated array of features
return np.concatenate(img_features), np.concatenate(rev_img_features)
def split_dataset(X, y):
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
# X_train, X_test, y_train, y_test = train_test_split(
# scaled_X, y, test_size=0.2, random_state=rand_state)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=rand_state)
print('X_train len: {}' .format(len(X_train)))
print('X_train len: {}' .format(len(y_train)))
print('X_test len: {}' .format(len(X_test)))
print('y_test len: {}' .format(len(y_test)))
#
return X_train, X_test, y_train, y_test
def get_features(cspace, hog_channel ):
# print('extracting features')
car_features = extract_imgs_features(cars, cspace=cspace, spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=hog_channel,
spatial_feat=False, hist_feat=False, hog_feat=True)
notcar_features = extract_imgs_features(notcars, cspace=cspace, spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=hog_channel,
spatial_feat=False, hist_feat=False, hog_feat=True)
print('done extracting features')
# Create an array stack of feature vectors
X = np.vstack((car_features, notcar_features)).astype(np.float64)
global X_scaler
X_scaler = StandardScaler().fit(X)
scaled_X = X_scaler.transform(X)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
print ('car_features len: {}, notcar_features' .format(len(notcar_features)))
return scaled_X, y
# return X, y
def train_svc(X_train, y_train, X_test, y_test, svc):
t=time.time()
svc.fit(X_train, y_train)
t2 = time.time()
train_time = round(t2-t, 2)
print(train_time, 'Seconds to train SVC...')
# Check the score of the SVC
acc = round(svc.score(X_test, y_test), 4)
print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))
# Check the prediction time for a single sample
t=time.time()
return svc, acc, train_time
def predict_svn(X_test, y_test, svc):
n_predict = 10
t=time.time()
print('My SVC predicts: \t', svc.predict(X_test[0:n_predict]))
print('For these',n_predict, 'labels: \t', y_test[0:n_predict])
t2 = time.time()
print(round(t2-t, 5), 'Seconds to predict', n_predict,'labels with SVC')
return svc
def train_predict(cspace, hog_channel, svc):
print('-----------------------------------------------------------')
msg = ('Colourspace: {}, hog channel: {}' .format(cspace, hog_channel))
print(msg)
X, y = get_features(cspace, hog_channel )
X_train, X_test, y_train, y_test = split_dataset(X,y)
# Use a linear SVC
# print('creating svc')
# svc = LinearSVC()
# print('done creating svc')
svc, acc,train_time = train_svc(X_train, y_train, X_test, y_test, svc)
svc = predict_svn(X_test, y_test, svc)
msg = msg + '- \taccuracy: {}' .format(acc) + '\ttrain time: {}' .format(train_time)
print(msg)
return svc, X, y
print('-----------------------------------------------------------')
# TODO play with these values to see how your classifier
# performs under different binning scenarios
spatial = 32
histbin = 32
print('running')
cars = all_veh
notcars = all_non_veh
global_colour_space = 'YCrCb'
global_channel = 'ALL'
X = None
y = None
# train_predict('YCrCb', 'ALL')
svc = LinearSVC()
sandbox = False
if sandbox:
cspaces = ['RGB', 'HLS', 'HSV', 'YUV', 'LUV', 'YCrCb', 'LAB']
channels = ['ALL', 0, 1,2]
for cspace in cspaces:
for channel in channels:
# call the svn
svc, X, y = train_predict(cspace, channel, svc)
else:
svc, X, y = train_predict(global_colour_space, global_channel, svc)
print('done')
# svc = train_svc(X_train, y_train)
# n_predict = 10
# print('My SVC predicts: \t', svc.predict(X_test[0:n_predict]))
# print('For these',n_predict, 'labels: \t', y_test[0:n_predict])
# t2 = time.time()
# print(round(t2-t, 5), 'Seconds to predict', n_predict,'labels with SVC')
def save_model(filename):
with open(filename, 'wb') as save_file:
pickle.dump(
{ 'svc':svc,
'scaler': X_scaler,
'orient': orient,
'pix_per_cell': pix_per_cell,
'cell_per_block': cell_per_block,
'spatial_size': spatial_size,
'hist_bins': hist_bins,
},
save_file, pickle.HIGHEST_PROTOCOL)
save_model('svc_pickle.p')
# Start of the sliding window functioality
# Define a function you will pass an image
# and the list of windows to be searched (output of slide_windows())
def search_windows(img, windows, clf, scaler, color_space='RGB',
spatial_size=(32, 32), hist_bins=32,
hist_range=(0, 256), orient=9,
pix_per_cell=8, cell_per_block=2,
hog_channel=0, spatial_feat=True,
hist_feat=True, hog_feat=True):
#1) Create an empty list to receive positive detection windows
on_windows = []
#2) Iterate over all windows in the list
for window in windows:
#3) Extract the test window from original image
test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
#4) Extract features for that window using single_img_features()
features = single_img_features(test_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
#5) Scale extracted features to be fed to classifier
test_features = scaler.transform(np.array(features).reshape(1, -1))
#6) Predict using your classifier
prediction = clf.predict(test_features)
#7) If positive (prediction == 1) then save the window
if prediction == 1:
on_windows.append(window)
#8) Return windows for positive detections
return on_windows
#load data from files
dist_pickle = pickle.load( open("./svc_pickle.p", "rb" ) )
svc = dist_pickle["svc"]
X_scaler = dist_pickle["scaler"]
orient = dist_pickle["orient"]
pix_per_cell = dist_pickle["pix_per_cell"]
cell_per_block = dist_pickle["cell_per_block"]
spatial_size = dist_pickle["spatial_size"]
hist_bins = dist_pickle["hist_bins"]
# Define a function to draw bounding boxes
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
# extracted the code to get the searchable portion of the image into its own meth
def get_image_to_search(full_img, ystart, ystop):
img_tosearch = full_img[ystart:ystop,:,:]
ctrans_tosearch = convert_color(img_tosearch, cspace='RGB2YCrCb')
# plt.imshow(ctrans_tosearch)
return ctrans_tosearch
#
img = mpimg.imread('./test_images/test1.jpg')
plt.imshow(img)
print('image shape: {}'.format(img.shape))
# ystart = 400
# ystop = 656
# scale = 1.5
# ctrans_tosearch = get_image_to_search(img, ystart, ystop)
class Rect():
def __init__(self, xbox_left, ytop_draw, win_draw):
self.image_raw = False
self.xbox_left = xbox_left
self.ytop_draw = ytop_draw
self.win_draw = win_draw
# create a box variable in the format the heat map expects
self.box = ((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart))
# Define a single function that can extract features using hog sub-sampling and make predictions
def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins):
posi_boxes = []
negi_boxes = []
# print('image shape: {}'.format(img.shape))
draw_img = np.copy(img)
img = img.astype(np.float32)/255
#
# ctrans_tosearch = get_image_to_search(img, ystart, ystop)
# print('image shape: {}'.format(ctrans_tosearch.shape))
img_tosearch = img[ystart:ystop,:,:]
ctrans_tosearch = convert_color(img_tosearch, cspace=global_colour_space)
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))
ch1 = ctrans_tosearch[:,:,0]
ch2 = ctrans_tosearch[:,:,1]
ch3 = ctrans_tosearch[:,:,2]
# Define blocks and steps as above
nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1
nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1
nfeat_per_block = orient*cell_per_block**2
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell
window = 64
nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1
# print('nblocks_per_window: {}' .format(nblocks_per_window))
cells_per_step = 1 # Instead of overlap, define how many cells to step
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
# Compute individual channel HOG features for the entire image
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False)
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb*cells_per_step
xpos = xb*cells_per_step
# Extract HOG for this patch
if (global_channel == 0):
hog_features = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
elif (global_channel == 1):
hog_features = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
elif (global_channel == 2):
hog_features = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
elif(global_channel == 'ALL'):
# print('ALL')
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))
xleft = xpos*pix_per_cell
ytop = ypos*pix_per_cell
test_features = X_scaler.transform(hog_features.reshape(1, -1))
# Scale features and make a prediction
# test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1))
#test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1))
test_prediction = svc.predict(test_features)
# test_prediction = svc.predict(hog_features.reshape(1, -1))
xbox_left = np.int(xleft*scale)
ytop_draw = np.int(ytop*scale)
win_draw = np.int(window*scale)
if test_prediction == 1:
# xbox_left = np.int(xleft*scale)
# ytop_draw = np.int(ytop*scale)
# win_draw = np.int(window*scale)
# cv2.rectangle(draw_img,(xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart),(0,0,255),6)
# rect = Rect(xbox_left, ytop_draw, win_draw)
box = ((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart))
posi_boxes.append(box)
else:
box = ((xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart))
negi_boxes.append(box)
for box in negi_boxes:
# xbox_left = rect.xbox_left
# ytop_draw = rect.ytop_draw
# win_draw = rect.win_draw
# cv2.rectangle(draw_img ,(xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart),(0,0,255),6)
cv2.rectangle(draw_img ,box[0],box[1],(0,0,255),6)
for box in posi_boxes:
# xbox_left = rect.xbox_left
# ytop_draw = rect.ytop_draw
# win_draw = rect.win_draw
# cv2.rectangle(draw_img ,(xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart),(255,0,0),6)
cv2.rectangle(draw_img ,box[0],box[1],(255,0,0),6)
return draw_img, posi_boxes, negi_boxes
# get the boxes for the found cars in an image
def get_boxes_from_image(img):
ystart = 400
ystop = 500
scale = 1
out_img_1, posi_boxes_1, neg_boxes_1 = find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
ystart = 400
ystop = 656
scale = 1.5
out_img_15, posi_boxes_15, neg_boxes_15 = find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
scale = 2
out_img_2, posi_boxes_2, neg_boxes_2 = find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
ystart = 340
ystop = 656
scale = 3
# out_img_3, posi_boxes_3, neg_boxes_3 = find_cars(img, ystart, ystop, scale, svc,X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
ystart = 340
ystop = 656
scale = 3.5
# out_img_4, posi_boxes_4, neg_boxes_4 = find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
# all_posi_boxes = posi_boxes_2 + posi_boxes_3
all_posi_boxes = posi_boxes_1 +posi_boxes_15 + posi_boxes_2# + posi_boxes_3 + posi_boxes_4
# all_posi_boxes =posi_boxes_15+ posi_boxes_2 #+ posi_boxes_3# + posi_boxes_4
return all_posi_boxes, out_img_1, out_img_15, out_img_2#, out_img_3, out_img_4
# ystart = 400
# ystop = 560
# scale = 1
# out_img_1, posi_boxes_1, negi_boxes_1 = find_cars(img, ystart, ystop, scale, svc, X, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
#
# ystart = 400
# ystop = 656
# scale = 1.5
# out_img_15, posi_boxes_15, neg_boxes_15 = find_cars(img, ystart, ystop, scale, svc, X, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
#
# ystart = 400
# ystop = 656
# scale = 2
# out_img_2, posi_boxes_2, neg_boxes_2 = find_cars(img, ystart, ystop, scale, svc, X, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
#
# ystart = 400
# ystop = 656
# scale = 3
# out_img_3, posi_boxes_3, neg_boxes_3 = find_cars(img, ystart, ystop, scale, svc, X, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
#
# scale = 4
# ystart = 360
# ystop = 656
# out_img_4, posi_boxes_4, neg_boxes_3 = find_cars(img, ystart, ystop, scale, svc, X, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins)
all_posi_boxes, out_img_1, out_img_15, out_img_2 = get_boxes_from_image(img)
# plt.imshow(out_img)
fig = plt.figure()
fig.set_figheight(32)
fig.set_figwidth(32)
fig.set_figheight(15)
fig.set_figwidth(15)
plt.subplot(611)
plt.imshow(img)
plt.title('Input Image')
plt.subplot(612)
plt.imshow(out_img_1)
plt.title('Windows-Scale 1')
plt.subplot(613)
plt.imshow(out_img_15)
plt.title('Windows-Scale 1.5')
plt.subplot(614)
plt.imshow(out_img_2)
plt.title('Windows-Scale 2')
# plt.subplot(615)
# plt.imshow(out_img_3)
# plt.title('Windows-Scale 3')
# plt.subplot(616)
# plt.imshow(out_img_4)
# plt.title('Windows-Scale 4')
# fig.tight_layout()
plt.savefig('./output_images/windows_all.png',bbox_inches='tight')
filename = 'bbox_pickle.p'
with open(filename, 'wb') as save_file:
pickle.dump( all_posi_boxes, open( filename, "wb" ) )
# Read in a pickle file with bboxes saved
# Each item in the "all_bboxes" list will contain a
# list of boxes for one of the images shown above
all_posi_boxes = pickle.load( open( "bbox_pickle.p", "rb" ))
# # Read in image similar to one shown above
# image = mpimg.imread('test_image.jpg')
# heat = np.zeros_like(image[:,:,0]).astype(np.float)
def add_heat(heatmap, boxes):
# Iterate through list of bboxes
for box in boxes:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap# Iterate through list of bboxes
# apply a threshold to heatmap. Anything less that the threshold is discarded.
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
# draw bounding box using the max and min of the labels
#the labels were created from the heatmaps
def draw_labeled_bboxes(img, labels):
# Iterate through all detected cars
# print('labels count')
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
# Return the image
return img
# get the bounding boxes of each car by creating heat maps and thresholding
def get_bb_via_heat(img, boxes, video=False):
# print('boxes : {}' .format((boxes)))
heat = np.zeros_like(np.copy(img[:,:,0]))
threshold = 6
# print('video: {}'.format(video))
if video == True:
threshold = 15
no_of_frames_to_avg = 10
current_frame_boxes = list(boxes)
recent_frame_boxes.append(current_frame_boxes)
# print('recent_frame_boxes len: {}' .format(len(recent_frame_boxes)))
# print('recent_frame_boxes shape: {}' .format((recent_frame_boxes.shape)))
if (len(recent_frame_boxes) > no_of_frames_to_avg) :
del recent_frame_boxes[0]
boxes = []
for frame_boxes in recent_frame_boxes:
boxes = boxes + frame_boxes
# print('boxes : {}' .format((boxes)))
# print('boxes len: {}' .format(len(boxes)))
# heat = add_heat(heat, boxes)
# Add heat to each box in box list
heat = add_heat(heat, boxes)
# Apply threshold to help remove false positives
heat = apply_threshold(heat,threshold)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Find final boxes from heatmap using label function
labels = label(heatmap)
draw_img = draw_labeled_bboxes(np.copy(img), labels)
return draw_img, heatmap
draw_img, heatmap = get_bb_via_heat(img, all_posi_boxes)
recent_frame_boxes = []
fig = plt.figure()
fig.set_figheight(32)
fig.set_figwidth(32)
plt.subplot(121)
plt.imshow(draw_img)
plt.title('Car Positions')
plt.subplot(122)
plt.imshow(heatmap, cmap='hot')
plt.title('Heat Map')
fig.tight_layout()
plt.savefig('./output_images/heatmap.png',bbox_inches='tight')
# method to process an image
def process_image(img):
all_posi_boxes, out_img_1,out_img_15, out_img_2 = get_boxes_from_image(img)
# all_posi_boxes = posi_boxes_1 + posi_boxes_15 + posi_boxes_2 + posi_boxes_3 + posi_boxes_4
draw_img, heatmap = get_bb_via_heat(img, all_posi_boxes, False)
return draw_img
def process_image_video(img):
all_posi_boxes, out_img_1,out_img_15, out_img_2 = get_boxes_from_image(img)
# all_posi_boxes = posi_boxes_1 + posi_boxes_15 + posi_boxes_2 + posi_boxes_3 + posi_boxes_4
draw_img, heatmap = get_bb_via_heat(img, all_posi_boxes, True)
return draw_img
# parse the test images and call the process image method
test_image_files = glob.glob('./test_images/*.jpg')
processed_imgs = []
num_imgs = len(test_image_files)
fig = plt.figure()
subplot_ind = 11 + num_imgs * 100
fig.set_figheight(32)
fig.set_figwidth(32)
for file in test_image_files:
print ('processing file: {}' .format(file))
# print(os.path.basename(file))
# processed_imgs.append(process_image(img))
img = mpimg.imread(file)
plt.subplot(subplot_ind)
plt.title(os.path.basename(file))
processed_img = process_image(img)
plt.imshow(processed_img)
subplot_ind += 1
fig.tight_layout()
plt.savefig('./output_images/test_images_output.png',bbox_inches='tight')
# process a video, which in turn call the process_image method
from moviepy.editor import VideoFileClip
from IPython.display import HTML
recent_frame_boxes = []
def process_video(input_video_filename, output_video_filename):
# output_video = 'output.mp4'
clip1 = VideoFileClip(input_video_filename)
output_clip = clip1.fl_image(process_image_video)
output_clip.write_videofile(output_video_filename, audio=False)
# process_image(image)
# output_video_filename = 'challenge_video_output.mp4'
output_video_filename = 'project_video_output.mp4'
# input_video_filename = 'test_video.mp4'
input_video_filename = 'project_video.mp4'
# input_video_filename = 'challenge_video.mp4'
process_video(input_video_filename, output_video_filename)
# HTML("""
# <video width="1280" height="720" controls>
# <source src="{0}">
# </video>
# """.format(output_video_filename))